192.168.3.11 k8s-master-01
192.168.3.12 k8s-master-02
192.168.3.13 k8s-master-03
# systemctl stop firewalld && systemctl disable firewalld
# setenforce 0
# vim /etc/selinux/config
SELINUX=disabled
swapoff -a && sysctl -w vm.swappiness=0
sed '/swap/d' -i /etc/fstab
安裝Kubernetes、docker
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
# setenforce 0
# yum install -y kubelet kubeadm kubectl docker
# systemctl enable kubelet && systemctl start kubelet
# systemctl enable docker && systemctl start docker
# cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# sysctl --system
# docker info | grep -i cgroup
Cgroup Driver: systemd
# cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf | grep "cgroup-driver"
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemd"
systemd要一致
如果是cgroupfs
就要更改
#sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# yum install keepalived -y
# vim /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 520 #<--如果同一個網段有其他的keepalived 不能跟他同一個id
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.3.19 #<-----VIP IP
}
}
virtual_server 192.168.3.19 80 {
delay_loop 6
lb_algo loadbalance
lb_kind DR
nat_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 192.168.3.11 80 {
weight 1
TCP_CHECK {
connect_timeout 3
}
}
real_server 192.168.3.12 80 {
weight 1
TCP_CHECK {
connect_timeout 3
}
}
real_server 192.168.3.13 80 {
weight 1
TCP_CHECK {
connect_timeout 3
}
}
}
# systemctl start keepalived
# systemctl enable keepalived
# scp 192.168.3.11:/etc/keepalived/keepalived.conf /etc/keepalived/.
# systemctl start keepalived
# systemctl enable keepalived
# scp 192.168.3.11:/etc/keepalived/keepalived.conf /etc/keepalived/.
# systemctl start keepalived
# systemctl enable keepalived
查看一下messages 是否有以下的log紀錄
# tail -f /var/log/messages
bogus VRRP packet received on eth0 !!!
就是virtual_router_id 衝突 請注意
# mkdir /etc/kubernetes/etcd
# scp 192.168.3.21:/etc/kubernetes/etcd/* /etc/kubernetes/etcd
# vim /etc/kubernetes/config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
api:
advertiseAddress: "192.168.3.11"
etcd:
caFile: /etc/kubernetes/etcd/ca.pem
certFile: /etc/kubernetes/etcd/client.pem
keyFile: /etc/kubernetes/etcd/client-key.pem
endpoints:
- "https://192.168.3.21:2379"
- "https://192.168.3.22:2379"
- "https://192.168.3.23:2379"
networking:
podSubnet: 10.244.0.0/16
apiServerCertSANs:
- "192.168.3.11"
- "192.168.3.12"
- "192.168.3.13"
- "192.168.3.19"
apiServerExtraArgs:
endpoint-reconciler-type: lease
# swapoff -a
# kubeadm init --config /etc/kubernetes/config.yaml
等他一下,如果失敗了可以下kubeadm reset ; rm -rf /var/lib/etcd/*,刪除k8s設定初始化,和3台的etcd server的etcd資料rm -rf /var/lib/etcd/*
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
wget https://raw.githubusercontent.com/coreos/flannel/c5d10c8/Documentation/kube-flannel.yml -O /root/kube-flannel.yml
# kubectl apply -f /root/kube-flannel.yml
# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master-01 Ready master 36m v1.11.1
# scp -r 192.168.3.11:/etc/kubernetes/pki/* /etc/kubernetes/pki/.
# rm -f /etc/kubernetes/pki/apiserver.*
# scp 192.168.3.11:/etc/kubernetes/config.yaml /etc/kubernetes/.
# mkdir /etc/kubernetes/etcd
# scp -r 192.168.3.11:/etc/kubernetes/etcd/* /etc/kubernetes/etcd/.
# vim /etc/kubernetes/config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
api:
advertiseAddress: "192.168.3.12" #<------這裡要改k8s-master-02的IP喔
etcd:
caFile: /etc/kubernetes/etcd/ca.pem
certFile: /etc/kubernetes/etcd/client.pem
keyFile: /etc/kubernetes/etcd/client-key.pem
endpoints:
- "https://192.168.3.21:2379"
- "https://192.168.3.22:2379"
- "https://192.168.3.23:2379"
networking:
podSubnet: 10.244.0.0/16
apiServerCertSANs:
- "192.168.3.11"
- "192.168.3.12"
- "192.168.3.13"
- "192.168.3.19"
apiServerExtraArgs:
endpoint-reconciler-type: lease
# swapoff -a
# kubeadm init --config /etc/kubernetes/config.yaml
等他一下,如果失敗了可以下kubeadm reset,刪除k8s設定初始化,還要刪掉etcd # rm -rf /var/lib/etcd/*
在重新 kubeadm init
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
# scp -r 192.168.3.11:/etc/kubernetes/pki/* /etc/kubernetes/pki/.
# rm -f /etc/kubernetes/pki/apiserver.*
# scp 192.168.3.11:/etc/kubernetes/config.yaml /etc/kubernetes/.
# mkdir /etc/kubernetes/etcd
# scp -r 192.168.3.11:/etc/kubernetes/etcd/* /etc/kubernetes/etcd/.
# vim /etc/kubernetes/config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
api:
advertiseAddress: "192.168.3.13" #<------這裡要改k8s-master-03的IP喔
etcd:
caFile: /etc/kubernetes/etcd/ca.pem
certFile: /etc/kubernetes/etcd/client.pem
keyFile: /etc/kubernetes/etcd/client-key.pem
endpoints:
- "https://192.168.3.21:2379"
- "https://192.168.3.22:2379"
- "https://192.168.3.23:2379"
networking:
podSubnet: 10.244.0.0/16
apiServerCertSANs:
- "192.168.3.11"
- "192.168.3.12"
- "192.168.3.13"
- "192.168.3.19"
apiServerExtraArgs:
endpoint-reconciler-type: lease
# swapoff -a
# kubeadm init --config /etc/kubernetes/config.yaml
等他一下,如果失敗了可以下kubeadm reset,刪除k8s設定初始化,還要刪掉etcd # rm -rf /var/lib/etcd/*
在重新 kubeadm init
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
登入 k8s-master-01 192.168.3.11
# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master-01 Ready master 50m v1.11.1
k8s-master-02 Ready master 38m v1.11.1
k8s-master-03 Ready master 36m v1.11.1
# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-1 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}